Note. Boxplots display the interquartile range (IQR, center box), and the whiskers extend 1.5*IQR from the lower and upper hinge. The white point indicates the mean and the white center line indicates the median.


Data Preparation

Data Import

# workers
# initial data cleaning was done in SPSS (syntax files are available in "")
dtWorker <- list(
  raw.pre = read_spss("data/S1_Workers/processed/cleaned/MT - Pre-Measure - 06-15-2018.sav"),
  raw.post = read_spss("data/S1_Workers/processed/cleaned/MT - Post-Measure - 06-15-2018.sav"),
  raw.morning = read_spss("data/S1_Workers/processed/cleaned/MT - Morning - 06-15-2018.sav"),
  raw.afternoon = read_spss("data/S1_Workers/processed/cleaned/MT - Afternoon - 06-15-2018.sav")
)

# students
dtStudents <- list(
  raw.pre = read.csv(file = "data/S2_Students/raw/AOTS_Pre.csv", header = T, sep = ","),
  raw.post = read.csv(file = "data/S2_Students/raw/AOTS_Post.csv", header = T, sep = ","),
  raw.daily = read.csv(file = "data/S2_Students/raw/AOTS_Daily.csv", header = T, sep = ",")
)

# young medical professionals
dtMedical <- list(
  raw.pre = c(),
  raw.post = c(),
  raw.daily = c()
)

Data Cleaning & Data Exclusions

#  important names for Morning and Afternoon
  names.m <- c(
    "StartDate",
    "EndDate",
    "Finished",
    "Duration__in_seconds_",
    "RecordedDate",
    "ExternalReference",
    "Meta_Operating_System",
    "Contact_dum",
    "number",
    "time",
    "duration_1",
    "dyad.group",
    "gr_size",
    "gr_type_1",
    "gr_type_2",
    "gr_type_3",
    "gr_type_4",
    "gr_type_5",
    "gr_type_6",
    "gr_type_7",
    "gr_type_8",
    "gr_type_9",
    "gr_type_10",
    "gr_type_11",
    "gr_type_12",
    "gr_type_13",
    "gr_type_14",
    "gr_type_15",
    "gr_type_16",
    "gr_type_17_TEXT",
    "gr_context_1",
    "gr_context_2",
    "gr_context_3",
    "gr_context_4",
    "gr_context_5",
    "gr_context_6",
    "gr_context_7",
    "gr_context_8",
    "gr_context_9",
    "gr_context_10",
    "gr_context_11",
    "gr_context_12",
    "gr_context_13_TEXT",
    "gr_context_14_TEXT",
    "gr_dutchness",
    "dyad_type_1",
    "dyad_type_2",
    "dyad_type_3",
    "dyad_type_4",
    "dyad_type_5",
    "dyad_type_6",
    "dyad_type_7",
    "dyad_type_8",
    "dyad_type_9",
    "dyad_type_10",
    "dyad_type_11",
    "dyad_type_12",
    "dyad_type_13",
    "dyad_type_14",
    "dyad_type_15",
    "dyad_type_16",
    "dyad_type_17_TEXT",
    "Context_1",
    "Context_2",
    "Context_3",
    "Context_4",
    "Context_5",
    "Context_6",
    "Context_7",
    "Context_8",
    "Context_9",
    "Context_10",
    "Context_11",
    "Context_12",
    "Context_13_TEXT",
    "Context_14_TEXT",
    "keyMotive",
    "keymotive_fulfillemt_1",
    "keyMotive_Dutch_1",
    "autonomy_1",
    "competence_1",
    "relatedness_self_1",
    "relatedness_other_1",
    "qualityAccidental_1",
    "qualityVoluntary_1",
    "qualityCooperative_1",
    "qualityDutchy_1",
    "quality_overall_1",
    "quality_meaning_1",
    "quality_star_1",
    "wantInt",
    "desire_type_1",
    "desire_type_2",
    "desire_type_3",
    "desire_type_4",
    "desire_type_5",
    "desire_type_6",
    "desire_type_7",
    "desire_type_8",
    "desire_type_9",
    "desire_type_10",
    "desire_type_11",
    "desire_type_12",
    "desire_type_13",
    "desire_type_14",
    "desire_type_15",
    "desire_type_16",
    "desire_type_17_TEXT",
    "desire_context_1",
    "desire_context_2",
    "desire_context_3",
    "desire_context_4",
    "desire_context_5",
    "desire_context_6",
    "desire_context_7",
    "desire_context_8",
    "desire_context_9",
    "desire_context_10",
    "desire_context_11",
    "desire_context_12",
    "desire_context_13_TEXT",
    "desire_context_14_TEXT",
    "Reason_nodesire",
    "keyMotive_noInt",
    "keyMotive_noInt_fulf_1",
    "autonomy_NoInt_1",
    "competence_NoInt_1",
    "relatedness_1_NoInt_1",
    "thermometerDutch_1",
    "thermometerDutchInt_2",
    "ExWB_1",
    "alertness1",
    "calmness1",
    "valence1",
    "alertness2",
    "calmness2",
    "valence2",
    "inNonDutch",
    "NonDutchNum",
    "NonDutchType_1",
    "NonDutchType_2",
    "NonDutchType_3",
    "NonDutchType_4",
    "NonDutchType_5",
    "NonDutchType_6",
    "NonDutchType_7",
    "NonDutchType_8",
    "NonDutchType_9",
    "NonDutchType_10",
    "NonDutchType_11",
    "NonDutchType_12",
    "NonDutchType_13",
    "NonDutchType_14",
    "NonDutchType_15_TEXT",
    "date",
    "time.0",
    "LocationLatitude",
    "LocationLongitude"
  )
  
  names.a <- c(names.m, "keyInteraction_1", "keyInteractionTime")

# Create reduced data sets for morning and afternoon
  dat.mo <- dtWorker$raw.morning[, names.m]
  dat.mo$daytime <- "morning"
  
  dat.af <- dtWorker$raw.afternoon[, names.a]
  dat.af$daytime <- "afternoon"

# merge morning and afternoon measurements with indicator [+ clean up]
  daily.dat <- rbind.fill(dat.mo, dat.af)
  daily.dat <- daily.dat[daily.dat$ExternalReference != 55951, ]
  dtWorker$daily <- daily.dat
  rm(dat.mo, dat.af, names.m, names.a, daily.dat)


# names for pre-measurement
  names.pre <- c(
    "Finished",
    "age",
    "Gender",
    "Living",
    "roommate_1",
    "roommate_2",
    "roommate_3",
    "nationality",
    "SecondNationality",
    "timeNL_1",
    "Reason_2",
    "Reason_5",
    "Reason_7",
    "Reason_8_TEXT",
    "DutchLang",
    "occupation_1",
    "occupation_2",
    "occupation_3",
    "occupation_4",
    "occupation_7",
    "CurrentEducation_1",
    "education_level",
    "EduLang_2",
    "RUG_faculty",
    "Study.0",
    "association",
    "DutchMeetNum",
    "DutchFriends_1",
    "assimilation",
    "separation",
    "integration",
    "marginalization",
    "VIA_heritage",
    "VIA_Dutch",
    "SSAS_surrounding",
    "SSAS_privat",
    "SSAS_public",
    "autonomy",
    "relatedness",
    "competence",
    "anxiety",
    "swl",
    "alertness",
    "calmness",
    "valence",
    "date",
    "time",
    "City",
    "ZIP",
    "id"
  )
  
# reduced data set for pre measurement
  dat.pre.red <- dtWorker$raw.pre[, names.pre]

# merge with daily data [+ clean up]
  df.pre <- merge(
    x = dtWorker$daily,
    y = dat.pre.red,
    by.x = "ExternalReference",
    by.y = "id",
    all = T
  )
  rm(names.pre)

# adjust duplicate names to fit to indicate daily or pre measurement  
  names(df.pre) <- gsub("[[:punct:]]x", ".daily", names(df.pre))
  names(df.pre) <- gsub("[[:punct:]]y", ".pre", names(df.pre))

# names for post measurement  
  names.post <- c(
    "ExternalReference",
    "assimilation",
    "separation",
    "integration",
    "marginalization",
    "VIA_heritage",
    "VIA_Dutch",
    "anxiety",
    "swl",
    "rosenberg",
    "social_support",
    "stress",
    "discrimination",
    "discrimination_month",
    "NLE_1month",
    "NLE_6month",
    "NLE_12month"
  )

# reduced data set for post-measurement  
  dat.post.red <- dtWorker$raw.post[, names.post]

# merge post measurement with pre- and daily data  
  df <- merge(
    x = df.pre,
    y = dat.post.red,
    by.x = "ExternalReference",
    by.y = "ExternalReference",
    all = T
  )

# adjust duplicate names to indicate pre or post  
  names(df) <- gsub("[[:punct:]]x", ".pre", names(df))
  names(df) <- gsub("[[:punct:]]y", ".post", names(df))
  
# add to list
  dtWorker$combined <- df
  
# create data frame with cleaned data
  df <- dtWorker$combined %>%
    filter(Finished.pre == 1,
           Finished.daily == 1,
           !is.na(ExternalReference))

# add running number as measurement ID within participants   
  df$measureID = rowidv(df, cols = c("ExternalReference"))
  
  df <- df %>%
    mutate(
      PID = as.numeric(factor(ExternalReference)), # participant ID
      TID = measureID-1, # time ID with t0 = 0 for meaningfull intercept interpretations
      date = substr(StartDate,1,10), # awkward way of extracting date (best converted to )
      time = substr(StartDate,12,19), # awkward way of extracting time
      daynum = as.numeric(factor(date)), # all days as numeric for ordering
      daycor = ifelse(daytime=="morning" & period_to_seconds(hms(time))<period_to_seconds(hms("12:00:00")) | daytime=="afternoon" & period_to_seconds(hms(time))<period_to_seconds(hms("19:00:00")),daynum-1,daynum), # correctly identify which date the questionnaire is about
      daycor.lead = sprintf("%02d", daycor),
      daytime.lt = ifelse(daytime=="morning","a","b"), # morning / afternoon to a / b 
      day_time = paste(daycor.lead, daytime.lt, sep="_"), # combine day id with morning / afternoon
      session = as.numeric(factor(day_time)), # day and time identifier as numeric id
      SubTime = chron::times(time.0),
      time.daily = as.character(time.daily),
      PPDate = as.Date(df$date.daily),
      number = replace_na(number, 0),
      NonDutchNum = replace_na(NonDutchNum, 0)
    )
  
  dtWorker$clean <- df
  
# clean up
  rm(df.pre, names.post, dat.post.red, dat.pre.red, df)

# Export reduced Data
  #write.csv(dtWorker$clean, "data/processed/MT_clean-merged_07-05-2018.csv", row.names = F)
  #save(dtWorker$clean, file = "data/processed/MT_clean-merged_07-05-2018.RData")
# our own test IDs
ownIDs <- c(
  "beautifulLionfishXXXR5rcgVBzGu8hPvOqrK8UBJBw4owvi9nfRFSFu3lMzYhE",
  "niceDogoXXXmB8JI5SFu78SF3DVof84mGUPPNUr14p2HYFTtp31a6D1OwAzM6F-K",
  "amusedQuailXXXmhuc_fpTp8vPkMwDH1BzjaH1d1kHSO1bsPEfsnaEYk4WeVBfPi",
  "juwGAbtXX0_1kmZtSVqKh3PGaHOICqUyU4iBkrT3nDsI_uifuD1gzKcZerxaM5FL"
)

# Prepare dfs for Cleaning
df.pre <- dtStudents$raw.pre %>%
  mutate_all(na_if,"") %>%
  mutate_all(na_if,"NA") %>%
  filter(!is.na(ended)) %>% # remove all who did not finish
  filter(!e_mail %in% .$e_mail[duplicated(.$e_mail)]) %>% # remove all who did the pre questionnaire multiple times (b/c inconsistent ratings scales)
  filter(!session %in% ownIDs) %>% # remove our own test 
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

df.post <- dtStudents$raw.post %>%
  mutate_all(na_if,"") %>%
  mutate_all(na_if,"NA") %>%
  filter(!is.na(session)) %>% # remove own test runs
  filter(!session %in% ownIDs) %>% # remove our own test 
  filter(session %in% df.pre$session) %>% # remove anyone who wasn't in the pre
  filter(!is.na(ended)) %>% # remove all who never finished
  filter(!session %in% .$session[duplicated(.$session)]) %>% # remove all duplicate sessions
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

df.daily <- dtStudents$raw.daily %>%
  mutate_all(na_if,"") %>%
  mutate_all(na_if,"NA") %>%
  filter(!session %in% ownIDs) %>% # remove our own test 
  filter(session %in% df.pre$session) %>% # remove anyone who wasn't in the pre
  filter(!is.na(ended)) %>% # remove all who never finished
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

# merge daily with pre
dfPreDaily = merge(
  x = df.daily,
  y = df.pre,
  by = "session",
  suffixes = c(".daily", ".pre"),
  all = F
)
  
# merge daily with post 
dfCombined = merge(
  x = dfPreDaily,
  y = df.post,
  by = "session",
  suffixes = c(".pre", ".post"),
  all = F
)

# add to list
dtStudents$clean <- dfCombined 

# clean up workspace
rm(df.pre, df.daily, df.post, dfPreDaily, dfCombined, ownIDs)
#TBD

Calculate needed transformations

df <- dtWorker$clean

# Time and Date Variables
  # remove seconds from afternoon time
  df$SubTime[df$daytime == "afternoon"] = paste0(substring(as.character(df$time.0[df$daytime == "afternoon"]),4,8),":00") 
  df$time.daily[df$daytime == "afternoon" & !is.na(df$time.daily!="<NA>")] = paste0(substring(as.character(df$time.daily[df$daytime == "afternoon" & !is.na(df$time.daily!="<NA>")]),4,8),":00")
  
  # Correct morning / afternoon date where survey was collected the day after to indicate the correct date that was targeted
  df$PPDate[df$SubTime < "11:50:00" & df$daytime == "morning"] = df$PPDate[df$SubTime < "11:50:00" & df$daytime == "morning"]-1
  df$PPDate[df$SubTime < "18:50:00" & df$daytime == "afternoon"] = df$PPDate[df$SubTime < "18:50:00" & df$daytime == "afternoon"]-1
  
# Mood scales
  df$calmness.daily = rowSums(df[, c("calmness1", "calmness2")], na.rm = T)
  df$alertness.daily = rowSums(df[, c("alertness1", "alertness2")], na.rm =T)
  df$valence.daily = rowSums(df[, c("valence1", "valence2")], na.rm = T)
  
# Need scales
  df$keyMotiveFulfilled = rowSums(df[,c("keymotive_fulfillemt_1","keyMotive_noInt_fulf_1")], na.rm=T)
  df$autonomy.daily.all = rowSums(df[,c("autonomy_1","autonomy_NoInt_1")], na.rm=T)
  df$competence.daily.all = rowSums(df[,c("competence_1","competence_NoInt_1")], na.rm=T)
  #cor(df$relatedness_other_1, df$relatedness_self_1,use="complete.obs")
  df$relatedness.daily.all = rowMeans(df[,c("relatedness_other_1","relatedness_self_1","relatedness_1_NoInt_1")], na.rm=T)
  df$relatedness.daily.int = rowMeans(df[,c("relatedness_other_1","relatedness_self_1")], na.rm=T)

# summarize by participant (check that everything is within pp might not be the case for )
  between <- df %>%
    group_by(ExternalReference) %>%
    mutate(
      CtContactNL = sum(Contact_dum),
      CtContactNonNl = sum(inNonDutch),
      CtContactNLAll = sum(number),
      CtContactNonNlAll = sum(NonDutchNum),
      AvKeyNeed = mean(keyMotiveFulfilled, na.rm=T),
      AvKeyNeedInt = mean(keymotive_fulfillemt_1, na.rm=T),
      AvKeyNeedNoInt = mean(keyMotive_noInt_fulf_1, na.rm=T),
      AvAutonomy = mean(autonomy.daily.all, na.rm=T),
      AvCompetence = mean(competence.daily.all, na.rm=T),
      AvRelatedness = mean(relatedness.daily.all, na.rm=T),
      AvThermo = mean(thermometerDutch_1, na.rm=T),
      AvWB = mean(ExWB_1, na.rm=T)) %>%
    ungroup() %>%
    mutate(
      CtContactNL_c = scale(CtContactNL, scale = FALSE),
      AvKeyNeedInt_c = scale(AvKeyNeedInt, scale = FALSE),
      AvKeyNeed_c = scale(AvKeyNeed, scale = FALSE),
      CtContactNL_z = scale(CtContactNL, scale = TRUE),
      AvKeyNeedInt_z = scale(AvKeyNeedInt, scale = TRUE),
      AvKeyNeed_z = scale(AvKeyNeed, scale = TRUE)
    ) 
  
  warning("some variable transformations (esp. _c and _z) might be across all participants (i.e., not within PP).")
  
  dtWorker$full <- between
  
  rm(df, between)
  
  #save(df.btw, file = "data/processed/df.btw.RData")  
  #write_sav(df.btw, "data/processed/MT_clean-merged_pre-post.sav")
  
# export data to Mplus
  # df.mplus = remove_all_labels(select(df, PID, session, thermometerDutch_1, inNonDutch, Contact_dum, keyMotiveFulfilled, autonomy.daily.all, competence.daily.all, relatedness.daily.all))
  # names(df.mplus)= c("PID", "session", "att", "intin", "intout", "keymot", "aut", "comp", "rel")
  # mplus = df.mplus[order(df.mplus$PID, df.mplus$session),]
  # mplus.intcont = mplus[mplus$intout==1,]
  # prepareMplusData(mplus.intcont, "data/processed/dynamic-subset-intonly.dat")
df <- dtStudents$clean

# Add ID variables
  df$PID = as.numeric(factor(df$session)) # participant ID

# order time
  df$TID <- factor(df$date_period, levels = unique(dtStudents$raw.daily$date_period))
  df$TIDnum <- as.numeric(df$TID) #get numeric TID
  
  # check whether time ordering worked
  df <- df %>%
    arrange(PID, TID) #%>%
    #View()
  
# Interaction as Factor
  df$interaction.f <- factor(df$Interaction, levels = c("no interaction", "Dutch", "Non-Dutch"))
  df$intNL <- ifelse(df$Interaction=="Dutch",1,0)
  df$intNonNL<- ifelse(df$Interaction=="Non-Dutch",1,0)
   
# ------------------------------------------------------------------------------------------------------------- 
#                                       Combine Variables
# -------------------------------------------------------------------------------------------------------------
# Relatedness
  pairs.panels.new(df[c("RelatednessSelf","RelatednessOther")], 
                              labels = c("I shared information about myself.", "X shared information about themselves."))

  df$RelatednessInteraction <- rowMeans(df[c("RelatednessSelf","RelatednessOther")], na.rm=T)
  df$RelatednessInteraction[df$RelatednessInteraction == "NaN"] <- NA
# Relatedness Overall (JANNIS NOT SURE THESE ARE CORRECT, CHANGE ROWS?; J: Changed "NaN" in df$RelatednessInteraction to NA() should work now)
  df$Relatedness <- rowMeans(df[,c("RelatednessInteraction", "RelatednessNoInteraction")], na.rm=T)
# Pro-Sociality
  df$ProSo <- rowMeans(df[,c("ProSo1", "ProSo2", "ProSo3", "ProSo4")], na.rm=T)
# Anti-Sociality
  df$AntiSo <- rowMeans(df[,c("AntiSo1", "AntiSo2", "AntiSo3", "AntiSo4")], na.rm=T)


# ------------------------------------------------------------------------------------------------------------- 
#                                 Add Variables related to interaction partner
# -------------------------------------------------------------------------------------------------------------
# create function for later lapply
createIntPartDf <- function(inp) {
  
  # prepare the dataframe so that we can forloop over it later
    tmp <- data.frame(CC = as.character(inp$CC),
                    NewCC = as.character(inp$NewCC),
                    NewName = as.character(inp$NewName),
                    NewCloseness = inp$NewCloseness,
                    NewGender = inp$NewGender,
                    NewEthnicity = as.character(inp$NewEthnicity),
                    NewRelationship = as.character(inp$NewRelationship))
    
    tmp$CC2 <- recode(tmp$CC, 'SOMEONE ELSE' = "NA")
    tmp$CC2 <- ifelse(tmp$CC == 1 | tmp$CC == "SOMEONE ELSE", as.character(tmp$NewName), as.character(tmp$CC2)) 
    # maybe add [[:space:]]\b to remove space before word boundary or ^[[:space:]] to remove space in the beginning of a string
    tmp$CC2 <- gsub("^[[:space:]]", "", tmp$CC2)
    tmp$NewName <- gsub("^[[:space:]]", "", tmp$NewName)
    
  # open the variables that will be filled up in the foor-loop
    tmp$closeness <- rep(NA, nrow(tmp))
    tmp$gender <- rep(NA, nrow(tmp))
    tmp$ethnicity <- rep(NA, nrow(tmp))
    tmp$relationship <- rep(NA, nrow(tmp))
  
    # Run the for-loop. It finds the variables related to the name of the interaction partner. If there is a repeating interaction
    # partner (i.e. CC2) it takes the value (i.e. NewCloseness) from the first interaction (i.e. NewName)
      for (i in 1:nrow(tmp)) {
         if (is.na(tmp$CC2[i])) {
           next
         } else {
           tmp$closeness[i] <- na.omit(tmp$NewCloseness[as.character(tmp$CC2[i]) == as.character(tmp$NewName)])[1] #find closeness where CC2 matches NewName (na.omit + [1] to get the number)
           tmp$gender[i] <- na.omit(tmp$NewGender[as.character(tmp$CC2[i]) == as.character(tmp$NewName)])[1] #(na.omit + [1] to get the number and not the rest of the na.omit list)
           tmp$ethnicity[i] <- na.omit(as.character(tmp$NewEthnicity[as.character(tmp$CC2[i]) == as.character(tmp$NewName)]))[1] #PROBLEM IS THAT THERE ARE TOO MANY NA's: Difficult to deal with
           tmp$relationship[i] <- na.omit(as.character(tmp$NewRelationship[as.character(tmp$CC2[i]) == as.character(tmp$NewName)]))[1]
         }
      }
      
    out <- tmp
    out
}

# split df per participants and run function
  PP <- split(df, df$PID)
  PP <- lapply(PP, createIntPartDf); rm(createIntPartDf)
  
# add variables back to df
  remergePP <- do.call(rbind.data.frame, PP)
    colnames(remergePP) <- paste(colnames(remergePP), "_Calc", sep = "")
df <- cbind(df, remergePP);rm(remergePP)
writeLines("Test whether newly calculate df and old df have the same order:")
## Test whether newly calculate df and old df have the same order:
print(table(as.character(df$CC_Calc) == as.character(df$CC)))
## 
## TRUE 
## 3270
writeLines("If we also add the NAs we get the exact number:")
## If we also add the NAs we get the exact number:
print(sum(is.na(df$CC_Calc))+table(as.character(df$CC_Calc) == as.character(df$CC)))
## 
## TRUE 
## 4965
# ------------------------------------------------------------------------------------------------------------- 
#                                 Center Relevant Variables
# -------------------------------------------------------------------------------------------------------------

df <- df %>%
  group_by(PID) %>% 
  mutate(
    KeyNeedFullfillment.cm = mean(KeyNeedFullfillment, na.rm = TRUE), # cluster mean (mean of PP)
    KeyNeedFullfillment.cwc = KeyNeedFullfillment - KeyNeedFullfillment.cm, # cluster mean centered (within PP centered)
    closeness.cm = mean(closeness_Calc, na.rm = TRUE),
    closeness.cwc = closeness_Calc - closeness.cm
    ) %>%  
  ungroup()

# store
  dtStudents$full = df
# TBD

Worker Sample

Contact Hypothesis

Need Fulfillment

Student Sample

Contact Hypothesis

Need Fulfillment

Young Medical Professional Sample

Contact Hypothesis

Allport’s Conditions

Need Fulfillment

Software Information

The full session information with all relevant system information and all loaded and installed packages is available in the collapsible section below.

System Info
Table 1: R environment session info for reproducibility of results
Setting Value
version R version 4.1.1 (2021-08-10)
os macOS Big Sur 10.16
system x86_64, darwin17.0
ui X11
language (EN)
collate en_US.UTF-8
ctype en_US.UTF-8
tz Europe/Berlin
date 2021-10-11

Package Info
Table 2: Package info for reproducibility of results
Package Loaded version Date Source
bookdown 0.23 2021-08-13 CRAN (R 4.1.1)
data.table 1.14.0 2021-02-21 CRAN (R 4.1.0)
devtools 2.4.2 2021-06-07 CRAN (R 4.1.0)
dplyr 1.0.7 2021-06-18 CRAN (R 4.1.0)
ellipse 0.4.2 2020-05-27 CRAN (R 4.1.0)
Formula 1.2-4 2020-10-16 CRAN (R 4.1.0)
ggplot2 3.3.5 2021-06-25 CRAN (R 4.1.0)
ggthemes 4.2.4 2021-01-20 CRAN (R 4.1.0)
gridExtra 2.3 2017-09-09 CRAN (R 4.1.0)
haven 2.4.3 2021-08-04 CRAN (R 4.1.0)
Hmisc 4.5-0 2021-02-28 CRAN (R 4.1.0)
kableExtra 1.3.4 2021-02-20 CRAN (R 4.1.0)
knitr 1.36 2021-09-29 CRAN (R 4.1.0)
lattice 0.20-44 2021-05-02 CRAN (R 4.1.1)
lubridate 1.7.10 2021-02-26 CRAN (R 4.1.0)
mada 0.5.10 2020-05-25 CRAN (R 4.1.0)
mvmeta 1.0.3 2019-12-10 CRAN (R 4.1.0)
mvtnorm 1.1-2 2021-06-07 CRAN (R 4.1.0)
pander 0.6.4 2021-06-13 CRAN (R 4.1.0)
plotly 4.9.4.9000 2021-08-28 Github ()
plyr 1.8.6 2020-03-03 CRAN (R 4.1.0)
psych 2.1.6 2021-06-18 CRAN (R 4.1.0)
RColorBrewer 1.1-2 2014-12-07 CRAN (R 4.1.0)
remedy 0.1.0 2018-12-03 CRAN (R 4.1.0)
reshape2 1.4.4 2020-04-09 CRAN (R 4.1.0)
rmarkdown 2.11 2021-09-14 CRAN (R 4.1.1)
sessioninfo 1.1.1 2018-11-05 CRAN (R 4.1.0)
stringi 1.7.5 2021-10-04 CRAN (R 4.1.1)
stringr 1.4.0 2019-02-10 CRAN (R 4.1.0)
survival 3.2-12 2021-08-13 CRAN (R 4.1.1)
tibble 3.1.5 2021-09-30 CRAN (R 4.1.0)
tidyr 1.1.4 2021-09-27 CRAN (R 4.1.0)
usethis 2.0.1 2021-02-10 CRAN (R 4.1.0)

Full Session Info (including loaded but unattached packages — for troubleshooting only)

R version 4.1.1 (2021-08-10)

Platform: x86_64-apple-darwin17.0 (64-bit)

locale: en_US.UTF-8||en_US.UTF-8||en_US.UTF-8||C||en_US.UTF-8||en_US.UTF-8

attached base packages:

  • stats
  • graphics
  • grDevices
  • datasets
  • utils
  • methods
  • base

other attached packages:

  • lubridate(v.1.7.10)
  • reshape2(v.1.4.4)
  • stringi(v.1.7.5)
  • stringr(v.1.4.0)
  • kableExtra(v.1.3.4)
  • Hmisc(v.4.5-0)
  • Formula(v.1.2-4)
  • survival(v.3.2-12)
  • lattice(v.0.20-44)
  • tidyr(v.1.1.4)
  • dplyr(v.1.0.7)
  • plyr(v.1.8.6)
  • data.table(v.1.14.0)
  • mada(v.0.5.10)
  • mvmeta(v.1.0.3)
  • ellipse(v.0.4.2)
  • mvtnorm(v.1.1-2)
  • devtools(v.2.4.2)
  • usethis(v.2.0.1)
  • pander(v.0.6.4)
  • tibble(v.3.1.5)
  • sessioninfo(v.1.1.1)
  • gridExtra(v.2.3)
  • plotly(v.4.9.4.9000)
  • RColorBrewer(v.1.1-2)
  • haven(v.2.4.3)
  • ggthemes(v.4.2.4)
  • ggplot2(v.3.3.5)
  • psych(v.2.1.6)
  • bookdown(v.0.23)
  • remedy(v.0.1.0)
  • knitr(v.1.36)
  • rmarkdown(v.2.11)

loaded via a namespace (and not attached):

  • colorspace(v.2.0-2)
  • ellipsis(v.0.3.2)
  • rprojroot(v.2.0.2)
  • htmlTable(v.2.2.1)
  • base64enc(v.0.1-3)
  • fs(v.1.5.0)
  • rstudioapi(v.0.13)
  • remotes(v.2.4.0)
  • fansi(v.0.5.0)
  • xml2(v.1.3.2)
  • splines(v.4.1.1)
  • mnormt(v.2.0.2)
  • cachem(v.1.0.6)
  • pkgload(v.1.2.1)
  • jsonlite(v.1.7.2)
  • cluster(v.2.1.2)
  • png(v.0.1-7)
  • readr(v.2.0.2)
  • compiler(v.4.1.1)
  • httr(v.1.4.2)
  • backports(v.1.2.1)
  • assertthat(v.0.2.1)
  • Matrix(v.1.3-4)
  • fastmap(v.1.1.0)
  • lazyeval(v.0.2.2)
  • cli(v.3.0.1)
  • htmltools(v.0.5.2)
  • prettyunits(v.1.1.1)
  • tools(v.4.1.1)
  • gtable(v.0.3.0)
  • glue(v.1.4.2)
  • Rcpp(v.1.0.7)
  • jquerylib(v.0.1.4)
  • vctrs(v.0.3.8)
  • mixmeta(v.1.1.3)
  • svglite(v.2.0.0)
  • nlme(v.3.1-152)
  • xfun(v.0.26)
  • ps(v.1.6.0)
  • testthat(v.3.0.4)
  • rvest(v.1.0.1)
  • lifecycle(v.1.0.1)
  • renv(v.0.14.0)
  • scales(v.1.1.1)
  • hms(v.1.1.1)
  • parallel(v.4.1.1)
  • rematch2(v.2.1.2)
  • yaml(v.2.2.1)
  • memoise(v.2.0.0)
  • sass(v.0.4.0)
  • rpart(v.4.1-15)
  • latticeExtra(v.0.6-29)
  • highr(v.0.9)
  • desc(v.1.3.0)
  • checkmate(v.2.0.0)
  • pkgbuild(v.1.2.0)
  • chron(v.2.3-56)
  • rlang(v.0.4.11)
  • pkgconfig(v.2.0.3)
  • systemfonts(v.1.0.2)
  • evaluate(v.0.14)
  • purrr(v.0.3.4)
  • htmlwidgets(v.1.5.4)
  • processx(v.3.5.2)
  • tidyselect(v.1.1.1)
  • magrittr(v.2.0.1)
  • R6(v.2.5.1)
  • generics(v.0.1.0)
  • DBI(v.1.1.1)
  • pillar(v.1.6.3)
  • foreign(v.0.8-81)
  • withr(v.2.4.2)
  • nnet(v.7.3-16)
  • crayon(v.1.4.1)
  • utf8(v.1.2.2)
  • tmvnsim(v.1.0-2)
  • tzdb(v.0.1.2)
  • jpeg(v.0.1-9)
  • grid(v.4.1.1)
  • callr(v.3.7.0)
  • forcats(v.0.5.1)
  • digest(v.0.6.28)
  • webshot(v.0.5.2)
  • munsell(v.0.5.0)
  • viridisLite(v.0.4.0)
  • bslib(v.0.3.0)


References